pw.perm = length(which(Zwv>=Zw))/perm
rw = c(rw, list(pval.perm=pw.perm))
}
if (test.type=="all" || test.type=="maxtype" || test.type=="m"){
pm.perm = length(which(Mv>=M))/perm
rmax = c(rmax, list(pval.perm=pm.perm))
}
}
r = list()
if (test.type=="all" || test.type=="original" || test.type=="o"){
r = c(r,list(original=ro))
}
if (test.type=="all" || test.type=="generalized" || test.type=="g"){
r = c(r,list(generalized=rg))
}
if (test.type=="all" || test.type=="weighted" || test.type=="w"){
r = c(r,list(weighted=rw))
}
if (test.type=="all" || test.type=="maxtype" || test.type=="m"){
r = c(r,list(maxtype=rmax))
}
return(r)
}
## supporting function
getR1R2 = function(E, G1){
R1 = R2 = 0
for (i in 1:nrow(E)){
e1 = is.na(match(E[i,1],G1))
e2 = is.na(match(E[i,2],G1))
if ((!e1) && (!e2))  R1 = R1 + 1
if (e1 && e2)  R2 = R2 + 1
}
return(list(R1=R1, R2=R2))
}
library(cluster)
library(vegan)
library(mnormt)
########################################################
#Power of the Chen-Friedman Test for the table in Figure 6 (normal location).
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
iterations=100
h=2
power=vector(length=5)
rp=rp1=vector(length=iterations)
for(j in 1:5)
{
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iterations)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma0)
points<-rbind(points1,points2)
x<-as.matrix(vegdist(points, binary=FALSE, method="euclidean"))
child<-spantree(x)$kid
edges<-t(rbind(c(2:(m+n)), child))
rp1[i]<-g.tests(edges, c(1:m), c((m+1):(m+n)), test.type="g", perm=0)$generalized$test.statistic
}
for(i in 1:iterations)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu1, sigma0)
points<-rbind(points1,points2)
x<-as.matrix(vegdist(points, binary=FALSE, method="euclidean"))
child<-spantree(x)$kid
edges<-t(rbind(c(2:(m+n)), child))
rp[i]<-g.tests(edges, c(1:m), c((m+1):(m+n)), test.type="g", perm=0)$generalized$test.statistic
}
power[j]<-length(which(rp> quantile(rp1, 0.95)))/iterations
}
write(as.vector(power), file="FR_NewNormal_HD.txt", ncolumns=1)
##################################################################
#Power of the Chen-Friedman Test for the table in Figure 6 (normal scale).
library(cluster)
library(vegan)
library(mnormt)
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
iterations=100
h=2
power=vector(length=2)
rp=rp1=vector(length=iterations)
for(j in 1:5)
{
mu0=rep(0, d[j])
delta1<-h/(m+n)^(1/2)
sigma0=diag(rep(1, d[j]))
sigma1=sigma0+delta1*diag(rep(1, d[j]))
for(i in 1:iterations)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma0)
points<-rbind(points1,points2)
x<-as.matrix(vegdist(points, binary=FALSE, method="euclidean"))
child<-spantree(x)$kid
edges<-t(rbind(c(2:(m+n)), child))
rp1[i]<-g.tests(edges, c(1:m), c((m+1):(m+n)), test.type="g", perm=0)$generalized$test.statistic
}
for(i in 1:iterations)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma1)
points<-rbind(points1,points2)
x<-as.matrix(vegdist(points, binary=FALSE, method="euclidean"))
child<-spantree(x)$kid
edges<-t(rbind(c(2:(m+n)), child))
rp[i]<-g.tests(edges, c(1:m), c((m+1):(m+n)), test.type="g", perm=0)$generalized$test.statistic
}
power[j]<-length(which(rp> quantile(rp1, 0.95)))/iterations
}
write(as.vector(power), file="FR_NewScale_HD.txt", ncolumns=1)
##################################################################
#Power of the Chen-Friedman Test for the table in Figure 7 (lognormal location).
library(cluster)
library(vegan)
library(mnormt)
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
iterations=100
h=2
power=vector(length=5)
rp=rp1=vector(length=iterations)
for(j in 1:5)
{
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iterations)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu0, sigma0))
points<-rbind(points1,points2)
x<-as.matrix(vegdist(points, binary=FALSE, method="euclidean"))
child<-spantree(x)$kid
edges<-t(rbind(c(2:(m+n)), child))
rp1[i]<-g.tests(edges, c(1:m), c((m+1):(m+n)), test.type="g", perm=0)$generalized$test.statistic
}
for(i in 1:iterations)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu1, sigma0))
points<-rbind(points1,points2)
x<-as.matrix(vegdist(points, binary=FALSE, method="euclidean"))
child<-spantree(x)$kid
edges<-t(rbind(c(2:(m+n)), child))
rp[i]<-g.tests(edges, c(1:m), c((m+1):(m+n)), test.type="g", perm=0)$generalized$test.statistic
}
power[j]<-length(which(rp> quantile(rp1, 0.95)))/iterations
}
write(as.vector(power), file="FR_NewLogNormal_HD.txt", ncolumns=1)
library(cluster)
library(mnormt)
library(localdepth)
library(fda.usc)
########################################################
#Power of the test based on the halfspace depth for the table in Figure 6 (normal location).
#Can be changed to obtain the power of the test based on the Mahalanobis depth by changing mdepth.TD to mdepth.MhD below.
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma0)
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp1[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu1, sigma0)
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
power[j]<-(length(which(rp> quantile(rp1, 0.975)))+length(which(rp < quantile(rp1, 0.025))))/iter
print(power[j])
}
write(as.vector(power), file="Normal_TD_HD.txt", ncolumns=1)
############################################################
#Power of the test based on the halfspace depth for the table in Figure 6 (normal scale).
#Can be changed to obtain the power of the test based on the Mahalanobis depth by changing mdepth.TD to mdepth.MhD below.
library(cluster)
library(mnormt)
library(localdepth)
library(fda.usc)
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
mu0=rep(0, d[j])
delta1<-h/(m+n)^(1/2)
sigma0=diag(rep(1, d[j]))
sigma1=sigma0+delta1*diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma0)
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp1[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma1)
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
power[j]<-(length(which(rp> quantile(rp1, 0.975)))+length(which(rp < quantile(rp1, 0.025))))/iter
print(power[j])
}
write(as.vector(power), file="Scale_TD_HD.txt", ncolumns=1)
############################################################
#Power of the test based on the halfspace depth for the table in Figure 7 (lognormal location).
#Can be changed to obtain the power of the test based on the Mahalanobis depth by changing mdepth.TD to mdepth.MhD below.
library(cluster)
library(mnormt)
library(localdepth)
library(fda.usc)
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
rp=rpnull=vector(length=iter)
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu0, sigma0))
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp1[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu1, sigma0))
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp[i]<-sum(outer(a,b,"<")*1)/(m*n)
print(power[j])
}
power[j]<-(length(which(rp> quantile(rp1, 0.975)))+length(which(rp < quantile(rp1, 0.025))))/iter
}
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
iterations=100
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma0)
xmean<-apply(points1, 2, mean)
ymean<-apply(points2, 2, mean)
diff<-xmean-ymean
vc<-(1/m+1/n)*(((m-1)*cov(points1)+(n-1)*cov(points2))/(m+n-2))
rp1[i]<-t(diff)%*%solve(vc)%*%diff
}
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu1, sigma0))
xmean<-apply(points1, 2, mean)
ymean<-apply(points2, 2, mean)
diff<-xmean-ymean
vc<-(1/m+1/n)*(((m-1)*cov(points1)+(n-1)*cov(points2))/(m+n-2))
rp[i]<-t(diff)%*%solve(vc)%*%diff
}
power[j]<-length(which(rp> quantile(rp1, 0.95)))/iter
}
write(as.vector(power), file="T2Lognormal_HD.txt", ncolumns=1)
library(cluster)
library(mnormt)
library(localdepth)
library(fda.usc)
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
rp=rpnull=vector(length=iter)
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu0, sigma0))
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp1[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu1, sigma0))
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp[i]<-sum(outer(a,b,"<")*1)/(m*n)
print(power[j])
}
power[j]<-(length(which(rp> quantile(rp1, 0.975)))+length(which(rp < quantile(rp1, 0.025))))/iter
}
library(cluster)
library(mnormt)
library(localdepth)
library(fda.usc)
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
rp=rpnull=vector(length=iter)
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu0, sigma0))
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp1[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu1, sigma0))
a<-mdepth.TD(points1, points1, scale=TRUE)$dep
b<-mdepth.TD(points2, points1, scale=TRUE)$dep
rp[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
power[j]<-(length(which(rp> quantile(rp1, 0.975)))+length(which(rp < quantile(rp1, 0.025))))/iter
print(power[j])
}
library(cluster)
library(mnormt)
library(localdepth)
library(fda.usc)
########################################################
#Power of the test based on the halfspace depth for the table in Figure 6 (normal location).
#Can be changed to obtain the power of the test based on the Mahalanobis depth by changing mdepth.MhD to mdepth.MhD below.
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma0)
a<-mdepth.MhD(points1, points1, scale=TRUE)$dep
b<-mdepth.MhD(points2, points1, scale=TRUE)$dep
rp1[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu1, sigma0)
a<-mdepth.MhD(points1, points1, scale=TRUE)$dep
b<-mdepth.MhD(points2, points1, scale=TRUE)$dep
rp[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
power[j]<-(length(which(rp> quantile(rp1, 0.975)))+length(which(rp < quantile(rp1, 0.025))))/iter
print(power[j])
}
write(as.vector(power), file="Normal_TD_HD.txt", ncolumns=1)
############################################################
#Power of the test based on the halfspace depth for the table in Figure 6 (normal scale).
#Can be changed to obtain the power of the test based on the Mahalanobis depth by changing mdepth.MhD to mdepth.MhD below.
library(cluster)
library(mnormt)
library(localdepth)
library(fda.usc)
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
mu0=rep(0, d[j])
delta1<-h/(m+n)^(1/2)
sigma0=diag(rep(1, d[j]))
sigma1=sigma0+delta1*diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma0)
a<-mdepth.MhD(points1, points1, scale=TRUE)$dep
b<-mdepth.MhD(points2, points1, scale=TRUE)$dep
rp1[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
for(i in 1:iter)
{
points1<-rmnorm(m, mu0, sigma0)
points2<-rmnorm(n, mu0, sigma1)
a<-mdepth.MhD(points1, points1, scale=TRUE)$dep
b<-mdepth.MhD(points2, points1, scale=TRUE)$dep
rp[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
power[j]<-(length(which(rp> quantile(rp1, 0.975)))+length(which(rp < quantile(rp1, 0.025))))/iter
print(power[j])
}
write(as.vector(power), file="Scale_TD_HD.txt", ncolumns=1)
############################################################
#Power of the test based on the halfspace depth for the table in Figure 7 (lognormal location).
#Can be changed to obtain the power of the test based on the Mahalanobis depth by changing mdepth.MhD to mdepth.MhD below.
library(cluster)
library(mnormt)
library(localdepth)
library(fda.usc)
d=c(10, 30, 50, 70, 100)
m=60
n=40
iter=500
h=2
power=vector(length=5)
rp=rp1=vector(length=iter)
for(j in 1:5)
{
rp=rpnull=vector(length=iter)
delta1<-h*rep(1, d[j])/sqrt(m+n)
mu0=rep(0, d[j])
mu1=delta1
sigma0=diag(rep(1, d[j]))
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu0, sigma0))
a<-mdepth.MhD(points1, points1, scale=TRUE)$dep
b<-mdepth.MhD(points2, points1, scale=TRUE)$dep
rp1[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
for(i in 1:iter)
{
points1<-exp(rmnorm(m, mu0, sigma0))
points2<-exp(rmnorm(n, mu1, sigma0))
a<-mdepth.MhD(points1, points1, scale=TRUE)$dep
b<-mdepth.MhD(points2, points1, scale=TRUE)$dep
rp[i]<-sum(outer(a,b,"<")*1)/(m*n)
}
power[j]<-(length(which(rp> quantile(rp1, 0.975)))+length(which(rp < quantile(rp1, 0.025))))/iter
print(power[j])
}
write(as.vector(power), file="Logormal_TD_HD.txt", ncolumns=1)
